import warnings
warnings.filterwarnings('ignore')
# Importación librerías
import pandas as pd
import numpy as np
import keras
import tensorflow as tf
from numpy import array
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.preprocessing.sequence import TimeseriesGenerator
import os
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import optuna
from optuna import Trial
from sklearn import metrics
from keras.callbacks import EarlyStopping,ReduceLROnPlateau
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from pandas_datareader import data as pdr
import yfinance as yfin
yfin.pdr_override()
data=pdr.get_data_yahoo('MSFT','2012-05-21','2023-02-28')
data=data[['Adj Close']]
data.columns=[['MSFT']]
[*********************100%***********************] 1 of 1 completed
datapacf=data.diff()
datapacf=datapacf[1:]
import matplotlib as mpl
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_pacf
with mpl.rc_context():
mpl.rc("figure", figsize=(10,10))
plot_pacf(datapacf, lags=50)
def shift_data(df,serie_name,period):
for i in range(1,period+1):
last=len(df.columns)
df.insert(last, serie_name+'.'+str(i), df[serie_name].shift(periods=i))
shift_data(data,'MSFT',9)
data=data.dropna()
data_shift=data.copy()
data_shift
| MSFT | MSFT.1 | MSFT.2 | MSFT.3 | MSFT.4 | MSFT.5 | MSFT.6 | MSFT.7 | MSFT.8 | MSFT.9 | |
|---|---|---|---|---|---|---|---|---|---|---|
| Date | ||||||||||
| 2012-06-04 | 23.138041 | 23.057009 | 23.656725 | 23.778301 | 23.956589 | 23.551367 | 23.559479 | 23.591892 | 24.118683 | 24.110573 |
| 2012-06-05 | 23.105625 | 23.138041 | 23.057009 | 23.656725 | 23.778301 | 23.956589 | 23.551367 | 23.559479 | 23.591892 | 24.118683 |
| 2012-06-06 | 23.786394 | 23.105625 | 23.138041 | 23.057009 | 23.656725 | 23.778301 | 23.956589 | 23.551367 | 23.559479 | 23.591892 |
| 2012-06-07 | 23.689150 | 23.786394 | 23.105625 | 23.138041 | 23.057009 | 23.656725 | 23.778301 | 23.956589 | 23.551367 | 23.559479 |
| 2012-06-08 | 24.029530 | 23.689150 | 23.786394 | 23.105625 | 23.138041 | 23.057009 | 23.656725 | 23.778301 | 23.956589 | 23.551367 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 2023-02-21 | 252.669998 | 258.059998 | 262.149994 | 269.320007 | 271.490021 | 270.642120 | 262.442657 | 262.961365 | 266.063599 | 266.891510 |
| 2023-02-22 | 251.509995 | 252.669998 | 258.059998 | 262.149994 | 269.320007 | 271.490021 | 270.642120 | 262.442657 | 262.961365 | 266.063599 |
| 2023-02-23 | 254.770004 | 251.509995 | 252.669998 | 258.059998 | 262.149994 | 269.320007 | 271.490021 | 270.642120 | 262.442657 | 262.961365 |
| 2023-02-24 | 249.220001 | 254.770004 | 251.509995 | 252.669998 | 258.059998 | 262.149994 | 269.320007 | 271.490021 | 270.642120 | 262.442657 |
| 2023-02-27 | 250.160004 | 249.220001 | 254.770004 | 251.509995 | 252.669998 | 258.059998 | 262.149994 | 269.320007 | 271.490021 | 270.642120 |
2701 rows × 10 columns
split_percent = 0.80
#20% para testing
splittest = int(split_percent*(len(data_shift)))
trainall=data_shift.iloc[:splittest,:]
scalerall = MinMaxScaler()
trainall_scaled = pd.DataFrame(scalerall.fit_transform(trainall), columns=trainall.columns)
xtrainall=trainall_scaled.iloc[:,1:]
ytrainall=trainall_scaled.iloc[:,0]
test=data_shift.iloc[splittest:,:]
test_scaled = pd.DataFrame(scalerall.transform(test), columns=test.columns)
xtest=test_scaled.iloc[:,1:]
ytest=test_scaled.iloc[:,0]
# del 80% usar 20% para validacion de la red neuronal
splittrain = int(split_percent*(len(data_shift[:splittest])))
train=data_shift.iloc[:splittrain,:]
scalerfirst = MinMaxScaler()
train_scaled = pd.DataFrame(scalerfirst.fit_transform(train), columns=train.columns)
xtrain=train_scaled.iloc[:,1:]
ytrain=train_scaled.iloc[:,0]
val=data_shift.iloc[splittrain:splittest,:]
val_scaled=pd.DataFrame(scalerfirst.transform(val), columns=val.columns)
xval=val_scaled.iloc[:,1:]
yval=val_scaled.iloc[:,0]
print(xtrain.shape[0])
print(xval.shape[0])
print(xtest.shape[0])
print(xtrainall.shape[0])
1728 432 541 2160
traindates=train.index
testdates=test.index
valdates=val.index
traindatesall=trainall.index
from sklearn.neighbors import LocalOutlierFactor
lof = LocalOutlierFactor()
yhat = lof.fit_predict(xtrain)
mask = yhat != -1
xtrain_no, ytrain_no,traindates = xtrain[mask], ytrain[mask],traindates[mask]
lof = LocalOutlierFactor()
yhat = lof.fit_predict(xtrainall)
mask = yhat != -1
xtrainall_no, ytrainall_no,traindates = xtrainall[mask], ytrainall[mask],traindatesall[mask]
ytrain_n=ytrain_no.to_numpy().reshape(-1,1,1)
xtrain_n=xtrain_no.to_numpy().reshape(-1,1,train.shape[1]-1)
yval_n=yval.to_numpy().reshape(-1,1,1)
xval_n=xval.to_numpy().reshape(-1,1,val.shape[1]-1)
ytest_n=ytest.to_numpy().reshape(-1,1,1)
xtest_n=xtest.to_numpy().reshape(-1,1,test.shape[1]-1)
ytrainall_n=ytrainall_no.to_numpy().reshape(-1,1,1)
xtrainall_n=xtrainall_no.to_numpy().reshape(-1,1,trainall.shape[1]-1)
import random
seed = 248
def random_seed(seed):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
random_seed(seed)
def objective(trial):
keras.backend.clear_session()
n_layers = trial.suggest_int('n_layers', 1, 4)
model = keras.Sequential()
for i in range(n_layers):
num_hidden = trial.suggest_int(f'n_units_l{i}', train.shape[1]-1, 400, log=True)
model.add(keras.layers.LSTM(num_hidden, input_shape=(1, train.shape[1]-1),return_sequences=True,
activation=trial.suggest_categorical(f'activation{i}', ['relu', 'linear','swish','sigmoid'])))
model.add(keras.layers.Dropout(rate = trial.suggest_float(f'dropout{i}', 0.0, 0.5)))
model.add(keras.layers.Dense(1,activation=trial.suggest_categorical(f'finalact1', ['relu', 'linear','swish','sigmoid'])))
val_ds = (xval_n,yval_n)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=trial.suggest_float('LRfactor', 0.0, 0.5),patience=trial.suggest_int('LRpatience', 5, 20),min_lr=1e-05,verbose=0)
model.compile(loss='mse', optimizer=trial.suggest_categorical(f'optimizer', ['Adagrad','adam', 'sgd','RMSprop']))
run_history = model.fit(xtrain_n,ytrain_n,validation_data=val_ds,epochs=50,callbacks=[reduce_lr],verbose=0)
return min(run_history.history['val_loss'])
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=60, timeout=1800)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
[I 2023-05-07 11:07:43,672] A new study created in memory with name: no-name-e3c1d798-6f1f-4874-b43e-220e02d2c5df [I 2023-05-07 11:07:56,630] Trial 0 finished with value: 0.005559791345149279 and parameters: {'n_layers': 3, 'n_units_l0': 67, 'activation0': 'swish', 'dropout0': 0.16867294409753014, 'n_units_l1': 9, 'activation1': 'linear', 'dropout1': 0.12941427125801985, 'n_units_l2': 114, 'activation2': 'relu', 'dropout2': 0.46383356511544543, 'finalact1': 'linear', 'LRfactor': 0.29329143964234855, 'LRpatience': 5, 'optimizer': 'adam'}. Best is trial 0 with value: 0.005559791345149279. [I 2023-05-07 11:08:08,896] Trial 1 finished with value: 0.09979862719774246 and parameters: {'n_layers': 2, 'n_units_l0': 26, 'activation0': 'sigmoid', 'dropout0': 0.12967712228257738, 'n_units_l1': 155, 'activation1': 'swish', 'dropout1': 0.1923781758528202, 'finalact1': 'relu', 'LRfactor': 0.3331169054832027, 'LRpatience': 15, 'optimizer': 'adam'}. Best is trial 0 with value: 0.005559791345149279. [I 2023-05-07 11:08:28,546] Trial 2 finished with value: 0.0045430320315063 and parameters: {'n_layers': 3, 'n_units_l0': 11, 'activation0': 'linear', 'dropout0': 0.34775348112136684, 'n_units_l1': 132, 'activation1': 'swish', 'dropout1': 0.3059737002615789, 'n_units_l2': 281, 'activation2': 'sigmoid', 'dropout2': 0.13668291183744258, 'finalact1': 'linear', 'LRfactor': 0.0168970887743784, 'LRpatience': 20, 'optimizer': 'adam'}. Best is trial 2 with value: 0.0045430320315063. [I 2023-05-07 11:08:36,528] Trial 3 finished with value: 0.0060798698104918 and parameters: {'n_layers': 1, 'n_units_l0': 117, 'activation0': 'swish', 'dropout0': 0.42468225747838806, 'finalact1': 'linear', 'LRfactor': 0.3621225828085472, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 2 with value: 0.0045430320315063. [I 2023-05-07 11:08:48,638] Trial 4 finished with value: 0.43830081820487976 and parameters: {'n_layers': 3, 'n_units_l0': 24, 'activation0': 'swish', 'dropout0': 0.06121504157253188, 'n_units_l1': 10, 'activation1': 'swish', 'dropout1': 0.024430547269137515, 'n_units_l2': 74, 'activation2': 'sigmoid', 'dropout2': 0.12209370657166052, 'finalact1': 'sigmoid', 'LRfactor': 0.0923395426850721, 'LRpatience': 5, 'optimizer': 'adam'}. Best is trial 2 with value: 0.0045430320315063. [I 2023-05-07 11:09:21,571] Trial 5 finished with value: 0.00322540826164186 and parameters: {'n_layers': 4, 'n_units_l0': 293, 'activation0': 'linear', 'dropout0': 0.17709543513731246, 'n_units_l1': 332, 'activation1': 'relu', 'dropout1': 0.4145018060329815, 'n_units_l2': 87, 'activation2': 'swish', 'dropout2': 0.3250147462615566, 'n_units_l3': 32, 'activation3': 'linear', 'dropout3': 0.39398961702356167, 'finalact1': 'swish', 'LRfactor': 0.22823480151901093, 'LRpatience': 9, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:09:45,377] Trial 6 finished with value: 0.045286376029253006 and parameters: {'n_layers': 4, 'n_units_l0': 347, 'activation0': 'relu', 'dropout0': 0.14654359625056673, 'n_units_l1': 47, 'activation1': 'swish', 'dropout1': 0.31112242843688354, 'n_units_l2': 28, 'activation2': 'linear', 'dropout2': 0.46062351252172223, 'n_units_l3': 22, 'activation3': 'swish', 'dropout3': 0.45484717952752185, 'finalact1': 'linear', 'LRfactor': 0.378644114258581, 'LRpatience': 10, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:10:02,880] Trial 7 finished with value: 0.21045361459255219 and parameters: {'n_layers': 3, 'n_units_l0': 12, 'activation0': 'sigmoid', 'dropout0': 0.21801532294699283, 'n_units_l1': 229, 'activation1': 'swish', 'dropout1': 0.11907009633931348, 'n_units_l2': 43, 'activation2': 'relu', 'dropout2': 0.06796709152677755, 'finalact1': 'relu', 'LRfactor': 0.34780103825215325, 'LRpatience': 16, 'optimizer': 'adam'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:10:26,327] Trial 8 finished with value: 0.025906680151820183 and parameters: {'n_layers': 3, 'n_units_l0': 287, 'activation0': 'swish', 'dropout0': 0.1160693892717361, 'n_units_l1': 9, 'activation1': 'relu', 'dropout1': 0.20025517802875564, 'n_units_l2': 378, 'activation2': 'sigmoid', 'dropout2': 0.07153459804069467, 'finalact1': 'relu', 'LRfactor': 0.1900756419582023, 'LRpatience': 17, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:10:34,637] Trial 9 finished with value: 1.1783032417297363 and parameters: {'n_layers': 2, 'n_units_l0': 11, 'activation0': 'linear', 'dropout0': 0.04126559423438614, 'n_units_l1': 27, 'activation1': 'linear', 'dropout1': 0.13390321416904383, 'finalact1': 'sigmoid', 'LRfactor': 0.22921446367229276, 'LRpatience': 13, 'optimizer': 'Adagrad'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:10:57,212] Trial 10 finished with value: 1.5341397523880005 and parameters: {'n_layers': 4, 'n_units_l0': 163, 'activation0': 'linear', 'dropout0': 0.27646044672655473, 'n_units_l1': 274, 'activation1': 'relu', 'dropout1': 0.48891494883669884, 'n_units_l2': 11, 'activation2': 'swish', 'dropout2': 0.2941606008209286, 'n_units_l3': 150, 'activation3': 'linear', 'dropout3': 0.012983477657974696, 'finalact1': 'swish', 'LRfactor': 0.44629835750848595, 'LRpatience': 10, 'optimizer': 'sgd'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:11:17,170] Trial 11 finished with value: 1.5433343648910522 and parameters: {'n_layers': 4, 'n_units_l0': 50, 'activation0': 'linear', 'dropout0': 0.3168088319135424, 'n_units_l1': 119, 'activation1': 'sigmoid', 'dropout1': 0.3593436093939495, 'n_units_l2': 227, 'activation2': 'swish', 'dropout2': 0.24086941943022075, 'n_units_l3': 9, 'activation3': 'sigmoid', 'dropout3': 0.4198890915687169, 'finalact1': 'swish', 'LRfactor': 0.027744377684452003, 'LRpatience': 20, 'optimizer': 'sgd'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:11:46,642] Trial 12 finished with value: 1.5643864870071411 and parameters: {'n_layers': 4, 'n_units_l0': 177, 'activation0': 'linear', 'dropout0': 0.34789740193348506, 'n_units_l1': 376, 'activation1': 'relu', 'dropout1': 0.3986454607115836, 'n_units_l2': 153, 'activation2': 'sigmoid', 'dropout2': 0.2067439216116647, 'n_units_l3': 62, 'activation3': 'linear', 'dropout3': 0.2871778657138313, 'finalact1': 'swish', 'LRfactor': 0.14872791284430537, 'LRpatience': 9, 'optimizer': 'Adagrad'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:11:56,434] Trial 13 finished with value: 0.004265576135367155 and parameters: {'n_layers': 2, 'n_units_l0': 9, 'activation0': 'linear', 'dropout0': 0.4238582470853339, 'n_units_l1': 123, 'activation1': 'sigmoid', 'dropout1': 0.28436832978467913, 'finalact1': 'linear', 'LRfactor': 0.0012120098587045658, 'LRpatience': 13, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:12:03,218] Trial 14 finished with value: 0.027070017531514168 and parameters: {'n_layers': 1, 'n_units_l0': 70, 'activation0': 'relu', 'dropout0': 0.47301388673355227, 'finalact1': 'swish', 'LRfactor': 0.12532530098806513, 'LRpatience': 8, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:12:20,963] Trial 15 finished with value: 0.025294087827205658 and parameters: {'n_layers': 2, 'n_units_l0': 309, 'activation0': 'linear', 'dropout0': 0.21369302221842137, 'n_units_l1': 84, 'activation1': 'sigmoid', 'dropout1': 0.43161586756782394, 'finalact1': 'linear', 'LRfactor': 0.2396016571598094, 'LRpatience': 13, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:12:38,394] Trial 16 finished with value: 0.004644718021154404 and parameters: {'n_layers': 2, 'n_units_l0': 21, 'activation0': 'linear', 'dropout0': 0.4123611720108736, 'n_units_l1': 347, 'activation1': 'sigmoid', 'dropout1': 0.3026878800254376, 'finalact1': 'swish', 'LRfactor': 0.05544487560604878, 'LRpatience': 7, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:12:44,585] Trial 17 finished with value: 0.3941434323787689 and parameters: {'n_layers': 1, 'n_units_l0': 44, 'activation0': 'linear', 'dropout0': 0.4897715014242243, 'finalact1': 'sigmoid', 'LRfactor': 0.0025004315916354455, 'LRpatience': 11, 'optimizer': 'RMSprop'}. Best is trial 5 with value: 0.00322540826164186. [I 2023-05-07 11:12:58,415] Trial 18 finished with value: 0.002885078079998493 and parameters: {'n_layers': 2, 'n_units_l0': 91, 'activation0': 'relu', 'dropout0': 0.26396149460252005, 'n_units_l1': 199, 'activation1': 'relu', 'dropout1': 0.4977693841991701, 'finalact1': 'linear', 'LRfactor': 0.08414884149563667, 'LRpatience': 14, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:13:05,809] Trial 19 finished with value: 0.13584588468074799 and parameters: {'n_layers': 1, 'n_units_l0': 105, 'activation0': 'relu', 'dropout0': 0.25639494944911345, 'finalact1': 'swish', 'LRfactor': 0.16230845804082739, 'LRpatience': 15, 'optimizer': 'Adagrad'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:13:32,736] Trial 20 finished with value: 1.5293538570404053 and parameters: {'n_layers': 4, 'n_units_l0': 206, 'activation0': 'relu', 'dropout0': 0.18449042561503715, 'n_units_l1': 218, 'activation1': 'relu', 'dropout1': 0.49820424779805195, 'n_units_l2': 89, 'activation2': 'swish', 'dropout2': 0.34497538472243366, 'n_units_l3': 328, 'activation3': 'relu', 'dropout3': 0.29832957490383416, 'finalact1': 'linear', 'LRfactor': 0.08910860503188296, 'LRpatience': 11, 'optimizer': 'sgd'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:13:47,460] Trial 21 finished with value: 0.011378061957657337 and parameters: {'n_layers': 2, 'n_units_l0': 111, 'activation0': 'relu', 'dropout0': 0.00021943448144223776, 'n_units_l1': 207, 'activation1': 'relu', 'dropout1': 0.4365505865756895, 'finalact1': 'linear', 'LRfactor': 0.06943987485384019, 'LRpatience': 14, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:14:05,039] Trial 22 finished with value: 0.3658670485019684 and parameters: {'n_layers': 2, 'n_units_l0': 38, 'activation0': 'sigmoid', 'dropout0': 0.2881123084348658, 'n_units_l1': 389, 'activation1': 'sigmoid', 'dropout1': 0.37781994799356255, 'finalact1': 'linear', 'LRfactor': 0.12033569428144326, 'LRpatience': 12, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:14:17,730] Trial 23 finished with value: 0.0032544746063649654 and parameters: {'n_layers': 2, 'n_units_l0': 86, 'activation0': 'relu', 'dropout0': 0.22865952254516478, 'n_units_l1': 163, 'activation1': 'relu', 'dropout1': 0.4688600720368829, 'finalact1': 'linear', 'LRfactor': 0.05051303497332628, 'LRpatience': 7, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:14:42,793] Trial 24 finished with value: 0.00391697371378541 and parameters: {'n_layers': 3, 'n_units_l0': 385, 'activation0': 'relu', 'dropout0': 0.2329903480289704, 'n_units_l1': 186, 'activation1': 'relu', 'dropout1': 0.4548066269089569, 'n_units_l2': 46, 'activation2': 'linear', 'dropout2': 0.005062470922051254, 'finalact1': 'linear', 'LRfactor': 0.19158700310519805, 'LRpatience': 7, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:14:58,969] Trial 25 finished with value: 0.003053093096241355 and parameters: {'n_layers': 2, 'n_units_l0': 80, 'activation0': 'relu', 'dropout0': 0.19129186747965715, 'n_units_l1': 274, 'activation1': 'relu', 'dropout1': 0.4997994500929897, 'finalact1': 'swish', 'LRfactor': 0.04739209218194274, 'LRpatience': 7, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:15:07,688] Trial 26 finished with value: 0.015591222792863846 and parameters: {'n_layers': 1, 'n_units_l0': 143, 'activation0': 'relu', 'dropout0': 0.187153256319322, 'finalact1': 'swish', 'LRfactor': 0.08417010798837408, 'LRpatience': 9, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:15:30,057] Trial 27 finished with value: 0.008950270712375641 and parameters: {'n_layers': 3, 'n_units_l0': 82, 'activation0': 'relu', 'dropout0': 0.11052111497463529, 'n_units_l1': 278, 'activation1': 'relu', 'dropout1': 0.49494113848298504, 'n_units_l2': 206, 'activation2': 'swish', 'dropout2': 0.3528334448205269, 'finalact1': 'swish', 'LRfactor': 0.19356765711735355, 'LRpatience': 6, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:15:47,643] Trial 28 finished with value: 2.0145208835601807 and parameters: {'n_layers': 2, 'n_units_l0': 195, 'activation0': 'relu', 'dropout0': 0.25789041947608193, 'n_units_l1': 282, 'activation1': 'relu', 'dropout1': 0.4130234723691704, 'finalact1': 'swish', 'LRfactor': 0.12162057253568848, 'LRpatience': 9, 'optimizer': 'Adagrad'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:16:12,301] Trial 29 finished with value: 1.5346497297286987 and parameters: {'n_layers': 3, 'n_units_l0': 64, 'activation0': 'sigmoid', 'dropout0': 0.16341457085974403, 'n_units_l1': 396, 'activation1': 'relu', 'dropout1': 0.4370320563046802, 'n_units_l2': 141, 'activation2': 'swish', 'dropout2': 0.38198689703324284, 'finalact1': 'swish', 'LRfactor': 0.26793767425855974, 'LRpatience': 5, 'optimizer': 'sgd'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:16:33,837] Trial 30 finished with value: 0.05346528813242912 and parameters: {'n_layers': 2, 'n_units_l0': 233, 'activation0': 'swish', 'dropout0': 0.184296721179856, 'n_units_l1': 270, 'activation1': 'linear', 'dropout1': 0.3668641796750447, 'finalact1': 'relu', 'LRfactor': 0.04259049358646333, 'LRpatience': 8, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:16:46,779] Trial 31 finished with value: 0.005324834957718849 and parameters: {'n_layers': 2, 'n_units_l0': 83, 'activation0': 'relu', 'dropout0': 0.21509940834617894, 'n_units_l1': 162, 'activation1': 'relu', 'dropout1': 0.46358965752759845, 'finalact1': 'linear', 'LRfactor': 0.05014877909285677, 'LRpatience': 7, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:17:01,549] Trial 32 finished with value: 0.40160778164863586 and parameters: {'n_layers': 2, 'n_units_l0': 126, 'activation0': 'relu', 'dropout0': 0.24291788242808804, 'n_units_l1': 188, 'activation1': 'relu', 'dropout1': 0.46674747615370515, 'finalact1': 'sigmoid', 'LRfactor': 0.0469982932077216, 'LRpatience': 6, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:17:18,926] Trial 33 finished with value: 0.005144442897289991 and parameters: {'n_layers': 2, 'n_units_l0': 157, 'activation0': 'relu', 'dropout0': 0.16470373667779242, 'n_units_l1': 270, 'activation1': 'relu', 'dropout1': 0.49945177473351454, 'finalact1': 'linear', 'LRfactor': 0.30355366948250706, 'LRpatience': 8, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:17:26,599] Trial 34 finished with value: 0.032795391976833344 and parameters: {'n_layers': 1, 'n_units_l0': 91, 'activation0': 'relu', 'dropout0': 0.19453626913946617, 'finalact1': 'swish', 'LRfactor': 0.02271056089620299, 'LRpatience': 6, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:17:43,302] Trial 35 finished with value: 0.006050636526197195 and parameters: {'n_layers': 2, 'n_units_l0': 243, 'activation0': 'relu', 'dropout0': 0.14924744764062176, 'n_units_l1': 97, 'activation1': 'relu', 'dropout1': 0.4629920488723728, 'finalact1': 'linear', 'LRfactor': 0.08140281413488404, 'LRpatience': 11, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:17:58,744] Trial 36 finished with value: 0.054259635508060455 and parameters: {'n_layers': 3, 'n_units_l0': 58, 'activation0': 'sigmoid', 'dropout0': 0.12916838727125235, 'n_units_l1': 157, 'activation1': 'linear', 'dropout1': 0.411435733040811, 'n_units_l2': 24, 'activation2': 'relu', 'dropout2': 0.2796179736860604, 'finalact1': 'relu', 'LRfactor': 0.10450070053559433, 'LRpatience': 15, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:18:13,237] Trial 37 finished with value: 0.057130251079797745 and parameters: {'n_layers': 2, 'n_units_l0': 101, 'activation0': 'swish', 'dropout0': 0.22074941058040431, 'n_units_l1': 198, 'activation1': 'relu', 'dropout1': 0.4572810710216848, 'finalact1': 'linear', 'LRfactor': 0.06868430633896297, 'LRpatience': 10, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:18:36,865] Trial 38 finished with value: 0.3955347239971161 and parameters: {'n_layers': 3, 'n_units_l0': 130, 'activation0': 'relu', 'dropout0': 0.27313077826443627, 'n_units_l1': 318, 'activation1': 'relu', 'dropout1': 0.403622718526646, 'n_units_l2': 69, 'activation2': 'linear', 'dropout2': 0.49631942165515497, 'finalact1': 'sigmoid', 'LRfactor': 0.030713418408253788, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:18:44,281] Trial 39 finished with value: 0.003721388755366206 and parameters: {'n_layers': 1, 'n_units_l0': 73, 'activation0': 'relu', 'dropout0': 0.09900655476713277, 'finalact1': 'swish', 'LRfactor': 0.14875254921836642, 'LRpatience': 5, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:19:05,428] Trial 40 finished with value: 0.007683604024350643 and parameters: {'n_layers': 3, 'n_units_l0': 141, 'activation0': 'swish', 'dropout0': 0.14466101739578113, 'n_units_l1': 223, 'activation1': 'relu', 'dropout1': 0.47213537811583645, 'n_units_l2': 96, 'activation2': 'swish', 'dropout2': 0.20896411954458682, 'finalact1': 'relu', 'LRfactor': 0.06414135669581414, 'LRpatience': 12, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:19:12,749] Trial 41 finished with value: 0.03244902193546295 and parameters: {'n_layers': 1, 'n_units_l0': 72, 'activation0': 'relu', 'dropout0': 0.10109970709214781, 'finalact1': 'swish', 'LRfactor': 0.144917593934197, 'LRpatience': 5, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:19:20,409] Trial 42 finished with value: 0.018656669184565544 and parameters: {'n_layers': 1, 'n_units_l0': 89, 'activation0': 'relu', 'dropout0': 0.20227842947767938, 'finalact1': 'swish', 'LRfactor': 0.10333356797628393, 'LRpatience': 6, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:19:28,681] Trial 43 finished with value: 0.009244161657989025 and parameters: {'n_layers': 1, 'n_units_l0': 117, 'activation0': 'relu', 'dropout0': 0.09790341842807129, 'finalact1': 'swish', 'LRfactor': 0.16496510580868182, 'LRpatience': 7, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:19:50,324] Trial 44 finished with value: 0.005784999113529921 and parameters: {'n_layers': 4, 'n_units_l0': 59, 'activation0': 'relu', 'dropout0': 0.07503476894821479, 'n_units_l1': 150, 'activation1': 'relu', 'dropout1': 0.4443550923672621, 'n_units_l2': 156, 'activation2': 'swish', 'dropout2': 0.4212441864265019, 'n_units_l3': 41, 'activation3': 'linear', 'dropout3': 0.4911800701000524, 'finalact1': 'swish', 'LRfactor': 0.2222160643102394, 'LRpatience': 5, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:20:04,053] Trial 45 finished with value: 1.5388635396957397 and parameters: {'n_layers': 2, 'n_units_l0': 76, 'activation0': 'linear', 'dropout0': 0.23506436659082755, 'n_units_l1': 240, 'activation1': 'linear', 'dropout1': 0.49780895583733303, 'finalact1': 'swish', 'LRfactor': 0.1073751681366005, 'LRpatience': 16, 'optimizer': 'Adagrad'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:20:19,966] Trial 46 finished with value: 1.5287742614746094 and parameters: {'n_layers': 2, 'n_units_l0': 97, 'activation0': 'sigmoid', 'dropout0': 0.1737837678105413, 'n_units_l1': 324, 'activation1': 'swish', 'dropout1': 0.42784753003421816, 'finalact1': 'linear', 'LRfactor': 0.1332456042062734, 'LRpatience': 8, 'optimizer': 'sgd'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:20:29,458] Trial 47 finished with value: 0.005088047124445438 and parameters: {'n_layers': 1, 'n_units_l0': 149, 'activation0': 'linear', 'dropout0': 0.1451510684857398, 'finalact1': 'swish', 'LRfactor': 0.03579202576578121, 'LRpatience': 14, 'optimizer': 'adam'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:20:56,653] Trial 48 finished with value: 0.003303260775282979 and parameters: {'n_layers': 3, 'n_units_l0': 171, 'activation0': 'relu', 'dropout0': 0.2982967028180327, 'n_units_l1': 174, 'activation1': 'relu', 'dropout1': 0.3470014572925372, 'n_units_l2': 374, 'activation2': 'relu', 'dropout2': 0.32463755484079754, 'finalact1': 'linear', 'LRfactor': 0.08924525823601982, 'LRpatience': 9, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:21:30,967] Trial 49 finished with value: 0.01148599199950695 and parameters: {'n_layers': 4, 'n_units_l0': 299, 'activation0': 'linear', 'dropout0': 0.30629468978935875, 'n_units_l1': 179, 'activation1': 'relu', 'dropout1': 0.35446306909124214, 'n_units_l2': 377, 'activation2': 'relu', 'dropout2': 0.3268506167929679, 'n_units_l3': 18, 'activation3': 'relu', 'dropout3': 0.1618001965225433, 'finalact1': 'linear', 'LRfactor': 0.014601736924677193, 'LRpatience': 9, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:21:54,114] Trial 50 finished with value: 0.21399347484111786 and parameters: {'n_layers': 4, 'n_units_l0': 119, 'activation0': 'relu', 'dropout0': 0.24918098204396683, 'n_units_l1': 141, 'activation1': 'relu', 'dropout1': 0.33391216421596653, 'n_units_l2': 246, 'activation2': 'relu', 'dropout2': 0.40980381359498164, 'n_units_l3': 69, 'activation3': 'sigmoid', 'dropout3': 0.3719112593440381, 'finalact1': 'linear', 'LRfactor': 0.08438774854368403, 'LRpatience': 10, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:22:17,526] Trial 51 finished with value: 0.0035813406575471163 and parameters: {'n_layers': 3, 'n_units_l0': 178, 'activation0': 'relu', 'dropout0': 0.20535674536995024, 'n_units_l1': 241, 'activation1': 'swish', 'dropout1': 0.3861793338335694, 'n_units_l2': 124, 'activation2': 'relu', 'dropout2': 0.29623909413946503, 'finalact1': 'linear', 'LRfactor': 0.06141609097550135, 'LRpatience': 7, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:22:40,541] Trial 52 finished with value: 0.0032448184210807085 and parameters: {'n_layers': 3, 'n_units_l0': 168, 'activation0': 'relu', 'dropout0': 0.22779927652937326, 'n_units_l1': 246, 'activation1': 'swish', 'dropout1': 0.4006129416693867, 'n_units_l2': 115, 'activation2': 'relu', 'dropout2': 0.3022520046370623, 'finalact1': 'linear', 'LRfactor': 0.06101066818498092, 'LRpatience': 7, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:23:10,407] Trial 53 finished with value: 0.003900539828464389 and parameters: {'n_layers': 3, 'n_units_l0': 244, 'activation0': 'relu', 'dropout0': 0.22495484603970303, 'n_units_l1': 301, 'activation1': 'swish', 'dropout1': 0.4122756891707664, 'n_units_l2': 173, 'activation2': 'relu', 'dropout2': 0.33230845865550945, 'finalact1': 'linear', 'LRfactor': 0.0032458748779490565, 'LRpatience': 8, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:23:37,893] Trial 54 finished with value: 0.0032519742380827665 and parameters: {'n_layers': 3, 'n_units_l0': 337, 'activation0': 'relu', 'dropout0': 0.26941308666976943, 'n_units_l1': 219, 'activation1': 'swish', 'dropout1': 0.4696574411652622, 'n_units_l2': 91, 'activation2': 'relu', 'dropout2': 0.3801311205123023, 'finalact1': 'linear', 'LRfactor': 0.03391348574875666, 'LRpatience': 9, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:24:03,967] Trial 55 finished with value: 0.025077136233448982 and parameters: {'n_layers': 2, 'n_units_l0': 393, 'activation0': 'relu', 'dropout0': 0.26663062201439736, 'n_units_l1': 237, 'activation1': 'swish', 'dropout1': 0.4770828648588398, 'finalact1': 'linear', 'LRfactor': 0.03397155929700725, 'LRpatience': 10, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:24:36,824] Trial 56 finished with value: 0.0030037229880690575 and parameters: {'n_layers': 3, 'n_units_l0': 337, 'activation0': 'linear', 'dropout0': 0.2788925874698209, 'n_units_l1': 331, 'activation1': 'swish', 'dropout1': 0.47504900202508393, 'n_units_l2': 91, 'activation2': 'relu', 'dropout2': 0.3803784230606309, 'finalact1': 'linear', 'LRfactor': 0.023909768763848274, 'LRpatience': 14, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:25:09,554] Trial 57 finished with value: 0.003051608568057418 and parameters: {'n_layers': 3, 'n_units_l0': 325, 'activation0': 'linear', 'dropout0': 0.3269062989788235, 'n_units_l1': 339, 'activation1': 'swish', 'dropout1': 0.44099426290966687, 'n_units_l2': 90, 'activation2': 'relu', 'dropout2': 0.3727934274146749, 'finalact1': 'linear', 'LRfactor': 0.01846193374928106, 'LRpatience': 16, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:25:39,639] Trial 58 finished with value: 0.39339444041252136 and parameters: {'n_layers': 3, 'n_units_l0': 270, 'activation0': 'linear', 'dropout0': 0.3347052570930846, 'n_units_l1': 335, 'activation1': 'swish', 'dropout1': 0.3909884292306449, 'n_units_l2': 115, 'activation2': 'relu', 'dropout2': 0.3805472567384719, 'finalact1': 'sigmoid', 'LRfactor': 0.01713053204907774, 'LRpatience': 16, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.002885078079998493. [I 2023-05-07 11:26:17,544] Trial 59 finished with value: 2.08960223197937 and parameters: {'n_layers': 4, 'n_units_l0': 336, 'activation0': 'linear', 'dropout0': 0.29552805230072954, 'n_units_l1': 394, 'activation1': 'swish', 'dropout1': 0.44199083857719856, 'n_units_l2': 62, 'activation2': 'relu', 'dropout2': 0.2636858915092423, 'n_units_l3': 130, 'activation3': 'swish', 'dropout3': 0.36374165033102035, 'finalact1': 'linear', 'LRfactor': 0.0010076209227722552, 'LRpatience': 14, 'optimizer': 'Adagrad'}. Best is trial 18 with value: 0.002885078079998493.
Number of finished trials: 60 Best trial: Value: 0.002885078079998493
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
Params:
n_layers: 2
n_units_l0: 91
activation0: relu
dropout0: 0.26396149460252005
n_units_l1: 199
activation1: relu
dropout1: 0.4977693841991701
finalact1: linear
LRfactor: 0.08414884149563667
LRpatience: 14
optimizer: RMSprop
#From Optuna same data
model =Sequential()
model.add(keras.layers.LSTM(91,input_shape=(1, train.shape[1]-1),return_sequences=True,activation=tf.keras.activations.relu))
model.add(keras.layers.Dropout(0.26))
model.add(keras.layers.LSTM(199,input_shape=(1, train.shape[1]-1),return_sequences=True,activation=tf.keras.activations.relu))
model.add(keras.layers.Dropout(0.49))
model.add(keras.layers.Dense(1,activation=tf.keras.activations.linear))
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.08,patience=14,min_lr=1e-05,verbose=0)
early_stoping = EarlyStopping(monitor="val_loss",min_delta=0,patience=14,verbose=0,mode="auto",restore_best_weights=True)
model.compile(optimizer='RMSprop',loss='mse')
val_ds = (xval_n,yval_n)
history = model.fit(xtrain_n,ytrain_n,validation_data=val_ds,epochs=200,verbose=0,callbacks=[early_stoping,reduce_lr])
hist=pd.DataFrame(history.history)
hist['epoch']=history.epoch
trace1 = go.Scatter(
x=hist['epoch'], y=hist['loss'],
mode='lines', name='Train_loss'
)
trace2 = go.Scatter(
x=hist['epoch'], y=hist['val_loss'],
mode='lines', name='Val_loss'
)
layout= go.Layout(
title= 'Optuna Loss best model History',
xaxis={'title':'Epoch'},
yaxis={'title':'Loss'}
)
fig= go.Figure(data=[trace1,trace2], layout=layout)
fig.show()
fig.write_html("Optuna Loss best model.html")
#From Optuna
model =Sequential()
model.add(keras.layers.LSTM(91,input_shape=(1, train.shape[1]-1),return_sequences=True,activation=tf.keras.activations.relu))
model.add(keras.layers.Dropout(0.26))
model.add(keras.layers.LSTM(199,input_shape=(1, train.shape[1]-1),return_sequences=True,activation=tf.keras.activations.relu))
model.add(keras.layers.Dropout(0.49))
model.add(keras.layers.Dense(1,activation=tf.keras.activations.linear))
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.08,patience=14,min_lr=1e-05,verbose=0)
early_stoping = EarlyStopping(monitor="loss",min_delta=0,patience=14,verbose=0,mode="auto",restore_best_weights=True)
model.compile(optimizer='RMSprop',loss='mse')
history = model.fit(xtrainall_n,ytrainall_n,epochs=200,verbose=1,callbacks=[early_stoping,reduce_lr])
Epoch 1/200 64/64 [==============================] - 1s 4ms/step - loss: 0.0394 - lr: 0.0010 Epoch 2/200 64/64 [==============================] - 0s 4ms/step - loss: 0.0036 - lr: 0.0010 Epoch 3/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0030 - lr: 0.0010 Epoch 4/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0030 - lr: 0.0010 Epoch 5/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0030 - lr: 0.0010 Epoch 6/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0029 - lr: 0.0010 Epoch 7/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0032 - lr: 0.0010 Epoch 8/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0028 - lr: 0.0010 Epoch 9/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0027 - lr: 0.0010 Epoch 10/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 11/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0027 - lr: 0.0010 Epoch 12/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 13/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0025 - lr: 0.0010 Epoch 14/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 0.0010 Epoch 15/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0025 - lr: 0.0010 Epoch 16/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 17/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0025 - lr: 0.0010 Epoch 18/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 19/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 20/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 0.0010 Epoch 21/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 22/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 0.0010 Epoch 23/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 24/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0023 - lr: 0.0010 Epoch 25/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0025 - lr: 0.0010 Epoch 26/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 0.0010 Epoch 27/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 0.0010 Epoch 28/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0023 - lr: 0.0010 Epoch 29/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 8.0000e-05 Epoch 30/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05 Epoch 31/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05 Epoch 32/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05 Epoch 33/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0023 - lr: 8.0000e-05 Epoch 34/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0020 - lr: 8.0000e-05 Epoch 35/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0020 - lr: 8.0000e-05 Epoch 36/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 8.0000e-05 Epoch 37/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0018 - lr: 8.0000e-05 Epoch 38/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05 Epoch 39/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0020 - lr: 8.0000e-05 Epoch 40/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 8.0000e-05 Epoch 41/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0024 - lr: 8.0000e-05 Epoch 42/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05 Epoch 43/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 8.0000e-05 Epoch 44/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05 Epoch 45/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0020 - lr: 8.0000e-05 Epoch 46/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 8.0000e-05 Epoch 47/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05 Epoch 48/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0022 - lr: 8.0000e-05 Epoch 49/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0023 - lr: 8.0000e-05 Epoch 50/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0020 - lr: 8.0000e-05 Epoch 51/200 64/64 [==============================] - 0s 3ms/step - loss: 0.0021 - lr: 8.0000e-05
prediction=model.predict(xtest_n)
17/17 [==============================] - 0s 2ms/step
final_values=[]
for i in range(len(prediction)):
final_values.append(prediction[i][0][0])
df_final = pd.DataFrame(0, index=np.arange(len(test)), columns=test.columns)
df_final['MSFT']=final_values
df_final = scalerall.inverse_transform(df_final)
final_values_rescaled=[]
for i in range(len(df_final)):
final_values_rescaled.append(df_final[i][0])
trace1 = go.Scatter(
x=pd.to_datetime(traindatesall), y=trainall.iloc[:,0],
mode='lines', name='Datatrain'
)
trace2 = go.Scatter(
x=pd.to_datetime(testdates), y=test.iloc[:,0],
mode='lines', name='Datatest'
)
trace3 = go.Scatter(
x=pd.to_datetime(testdates), y=final_values_rescaled,
mode='lines', name='Prediction'
)
layout= go.Layout(
title= 'MSFT Forecast',
xaxis={'title':'Date'},
yaxis={'title':'Close'}
)
fig= go.Figure(data=[trace1,trace2,trace3], layout=layout)
fig.show()
from sklearn.metrics import r2_score,mean_squared_error
import math
r2_score(test.iloc[:,0], final_values_rescaled)
0.9262641734398558
mse = mean_squared_error(test.iloc[:,0], final_values_rescaled)
rmse = math.sqrt(mse)
rmse
8.46748237389731
plt.plot(test.iloc[:,0], final_values_rescaled, 'ro')
plt.show()
data_new=pdr.get_data_yahoo('MSFT','2023-02-14','2023-05-05')
data_new=data_new[['Adj Close']]
data_new.columns=[['MSFT']]
[*********************100%***********************] 1 of 1 completed
shift_data(data_new,'MSFT',9)
data_new=data_new.dropna()
test_new = pd.DataFrame(scalerall.transform(data_new), columns=data_new.columns)
xtestnew=test_new.iloc[:,1:]
ytestnew=test_new.iloc[:,0]
xtestnew=xtestnew.to_numpy().reshape(-1,1,test_new.shape[1]-1)
ytestnew=ytestnew.to_numpy().reshape(-1,1,1)
prediction_new=model.predict(xtestnew)
2/2 [==============================] - 0s 3ms/step
test_new_dates=data_new.index
final_values_new=[]
for i in range(len(prediction_new)):
final_values_new.append(prediction_new[i][0][0])
df_final_new = pd.DataFrame(0, index=np.arange(len(test_new)), columns=test_new.columns)
df_final_new['MSFT']=final_values_new
df_final_new = scalerall.inverse_transform(df_final_new)
final_values_rescaled_new=[]
for i in range(len(df_final_new)):
final_values_rescaled_new.append(df_final_new[i][0])
trace1 = go.Scatter(
x=pd.to_datetime(test_new_dates), y=data_new['MSFT'].iloc[:,0],
mode='lines', name='Real'
)
trace2 = go.Scatter(
x=pd.to_datetime(test_new_dates), y=final_values_rescaled_new,
mode='lines', name='Prediction_new'
)
trace3 = go.Scatter(
x=pd.to_datetime(traindatesall), y=trainall.iloc[:,0],
mode='lines', name='Datatrain'
)
trace4 = go.Scatter(
x=pd.to_datetime(testdates), y=test.iloc[:,0],
mode='lines', name='Datatest'
)
trace5 = go.Scatter(
x=pd.to_datetime(testdates), y=final_values_rescaled,
mode='lines', name='Prediction'
)
layout= go.Layout(
title= 'MSFT Forecast March - April',
xaxis={'title':'Date'},
yaxis={'title':'Close'}
)
fig= go.Figure(data=[trace1,trace2,trace3,trace4,trace5], layout=layout)
fig.show()
mse = mean_squared_error(data_new['MSFT'].iloc[:,0], final_values_rescaled_new)
rmse = math.sqrt(mse)
rmse
9.705853575282516
model_json = model.to_json()
with open("modelMSFT.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("modelMSFT.h5")
import joblib
joblib.dump(scalerall, 'scalerMSFT.gz')
['scalerMSFT.gz']